地统计R实现

# 加载R包:
library(pacman)
p_load(sp,raster,rgeos,rgdal,rasterVis,raster,fs,sf,tidyverse,
       spatstat,maps,maptools)

source("E:/sjdata/code/sdmenm.R")
setwd("D:/xh2/")
sa <- read.csv("./f1_points/xh/xh_sa.csv")[,2:3]
oc <- read.csv("./f1_points/xh/xh_oc.csv")[,2:3]
na <- read.csv("./f1_points/xh/xh_na.csv")[,2:3]
as <- read.csv("./f1_points/xh/xh_as.csv")[,2:3]
az <- c("BIO10","BIO11","BIO12")
azs <- paste0("./f2_envs/" ,az,".tif")
envg<- stack(azs) 
plot(oc)
## 评估亚洲点的分布模式:####
# 以亚洲为例:
# 转换数据格式:
source("D:/HIDATA/项目代码xmind管理/share_code/sdmenm.R")
asp <- ppp_spatstat(as)
asp.km <- rescale(asp, 1000, "km")

# 评估数据的L模型和G模型:
L <- Lest(asp.km)
plot(L, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))
## 这种方法可以更清晰的解读数据的分布规律:
plot(L, . -r ~ r, main=NULL, las=1, legendargs=list(cex=0.6, xpd=TRUE, inset=c(0, 0) ))
plot(L, . -r ~ r, main=NULL, las=1)

## 使用矫正的L-hat来平衡非稳态:
library(spatstat)
plot(envelope(asp.km, Linhom, correction = "Ripley", verbose = T), 
     . - r ~ r, main = "Sigma 0.75")
lhat <- Linhom(asp.km, correction="Ripley")
plot(lhat, . -r ~ r, main=NULL, las=1,xlim = c(0, 300))

## G评估同样不需要依赖与距离:用这个可能更科学一些;
g  <- pcf(asp.km)
plot(g, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))
plot(g, . -r ~ r, main=NULL, las=1,xlim = c(0, 50), 
     legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))

## 衡量变量在三维空间中的聚集程度:
## 这样做有什么问题吗?
# 将数据0-1化之后用于评估;
# 这个结果有点难以解释;在较小的尺度上表现出随机分布的趋势,
# 而在较大尺度上展示出聚集的趋势;

ass <- raster::extract(envg,as) %>% na.omit()
ass <- zero_1(ass)[,4:6]
names(ass) <- c("BIO10","BIO11","BIO12")
threeDPpp <- pp3(ass$BIO10, ass$BIO11, ass$BIO12, box3(c(0,1)))
plot3d(ass)
threeDPppKest <- K3est(threeDPpp)
plot(threeDPppKest, . -r ~ r, main=NULL, las=1)

## 使用密度估计来评估建模变量的强度:
# 方法1:
Q <- quadratcount(asp.km, nx= 10, ny=10)
plot(asp.km, pch=20, cols="grey70", main=NULL)  # Plot points
plot(Q, add=TRUE)  # Add quadrat grid
# 计算每个分布块的密度:
Q.d <- intensity(Q)
# Plot the density
plot(intensity(Q, image=TRUE), main=NULL, las=1)  # Plot density raster
plot(asp.km, pch=20, cex=0.6, col=rgb(0,0,0,.5), add=TRUE)  # Add points

## 方法2:

X<-asp.km
plot(X)
plot(density(X, 10)) ## 构建的数字影响强度:
contour(density(X,10), axes=FALSE)
## 结合地理映射关系来考虑协变量对分布的影响和限制
# 这种分析也是建立在点的密度概率基础之上;

## 对坡度进行分组,分组后用于quadratcount的映射;


pzbio10 <- raster_utm(as,envg$BIO10)
pzbio10 <- as.im(pzbio10)
hist(pzbio10 , main=NULL, las=1)
pzbio10.km <- rescale(pzbio10, 1000, "km")

pzbio11 <- raster_utm(as,envg$BIO11)
pzbio11 <- as.im(pzbio11) 
hist(pzbio11 , main=NULL, las=1)
pzbio11.km <- rescale(pzbio11, 1000, "km") 

pzbio12 <- raster_utm(as,envg$BIO12)
pzbio12 <- as.im(pzbio12) %>% log()
hist(pzbio12 , main=NULL, las=1)
pzbio12.km <- rescale(pzbio12, 1000, "km") 

b <- quantile(pzbio10.km, probs = (0:4)/4) %>% round(3)
Zcut <- cut(pzbio10.km, breaks = b)
V <- tess(image = Zcut) 
plot(V)
plot(asp.km, add = TRUE, pch = "+")
## 基于分组
qb <- quadratcount(asp.km, tess = V)
plot(qb)
## 可视化点密度和协变量之间的依赖关系:
plot(rhohat(asp.km, pzbio10.km,method="ratio"))
bio10th0 <- rhohat(asp.km, pzbio10.km)
## 使用这种写法探索多种变量的空间分布特征:
## 单维:
H <- hyperframe(points=list(asp.km,asp.km,asp.km),
                Image=list(pzbio10.km,pzbio11.km,pzbio12.km)
                )

rhos <- with(H, rhohat(points, Image))




## 评估南美洲的分布特征:#####
sav <- raster::extract(envg,sa) %>% cbind(sa) %>% na.omit()
sa <- sav[,4:5] %>% data.frame()



sap <- ppp_spatstat(sa)
sap.km <- rescale(sap, 1000, "km")

# 评估数据的L模型和G模型:
L <- Lest(sap.km)
plot(L, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))
## 这种方法可以更清晰的解读数据的分布规律:
plot(L, . -r ~ r, main=NULL, las=1, legendargs=list(cex=0.6, xpd=TRUE, inset=c(0, 0) ))
plot(L, . -r ~ r, main=NULL, las=1)

## 使用矫正的L-hat来平衡非稳态:
library(spatstat)
plot(envelope(sap.km, Linhom, correction = "Ripley", verbose = T), 
     . - r ~ r, main = "Sigma 0.75")
lhat <- Linhom(sap.km, correction="Ripley")
plot(lhat, . -r ~ r, main=NULL, las=1,xlim = c(0, 300))
plot(sap.km)
## G评估同样不需要依赖与距离:用这个可能更科学一些;
g  <- pcf(sap.km)
plot(g, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))
plot(g, . -r ~ r, main=NULL, las=1,xlim = c(0, 50), 
     legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))
plot(sap.km)
## 衡量变量在三维空间中的聚集程度:
## 这样做有什么问题吗?
# 将数据0-1化之后用于评估;
# 这个结果有点难以解释;在较小的尺度上表现出随机分布的趋势,
# 而在较大尺度上展示出聚集的趋势;
az <- c("BIO10","BIO11","BIO12")
azs <- paste0("./f2_envs/" ,az,".tif")
envg<- stack(azs) 
ass <- raster::extract(envg,sa) %>% na.omit()
ass <- zero_1(ass)[,4:6]
names(ass) <- c("BIO10","BIO11","BIO12")
threeDPpp <- pp3(ass$BIO10, ass$BIO11, ass$BIO12, box3(c(0,1)))
plot3d(ass)
threeDPppKest <- K3est(threeDPpp)
plot(threeDPppKest, . -r ~ r, main=NULL, las=1)

## 使用密度估计来评估建模变量的强度:
# 方法1:
Q <- quadratcount(sap.km, nx= 10, ny=10)
plot(sap.km, pch=20, cols="grey70", main=NULL)  # Plot points
plot(Q, add=TRUE)  # Add quadrat grid
# 计算每个分布块的密度:
Q.d <- intensity(Q)
# Plot the density
plot(intensity(Q, image=TRUE), main=NULL, las=1)  # Plot density raster
plot(sap.km, pch=20, cex=0.6, col=rgb(0,0,0,.5), add=TRUE)  # Add points

## 方法2:

X<-sap.km
plot(X)
plot(density(X, 10)) ## 构建的数字影响强度:
contour(density(X,10), axes=FALSE)
## 结合地理映射关系来考虑协变量对分布的影响和限制
# 这种分析也是建立在点的密度概率基础之上;

## 对坡度进行分组,分组后用于quadratcount的映射;

pzbio10 <- raster_utm(sa,envg$BIO10)
pzbio10 <- as.im(pzbio10)
hist(pzbio10 , main=NULL, las=1)
pzbio10.km <- rescale(pzbio10, 1000, "km")

pzbio11 <- raster_utm(sa,envg$BIO11)
pzbio11 <- as.im(pzbio11) 
hist(pzbio11 , main=NULL, las=1)
pzbio11.km <- rescale(pzbio11, 1000, "km") 

plot(envg$BIO12)
pzbio12 <- raster_utm(sa,envg$BIO12)
pzbio12 <- as.im(pzbio12) 
hist(pzbio12 , main=NULL, las=1)
pzbio12.km <- rescale(pzbio12, 1000, "km")  

b <- quantile(pzbio10.km, probs = (0:4)/4) %>% round(3)
Zcut <- cut(pzbio10.km, breaks = b)
V <- tess(image = Zcut) 
plot(V)
plot(sap.km, add = TRUE, pch = "+")
## 基于分组
qb <- quadratcount(sap.km, tess = V)
plot(qb)
## 可视化点密度和协变量之间的依赖关系:
plot(rhohat(sap.km, pzbio10.km,method="ratio"))
bio10th0 <- rhohat(sap.km, pzbio12.km)
## 使用这种写法探索多种变量的空间分布特征:
## 单维:
H <- hyperframe(points=list(sap.km,sap.km,sap.km),
                Image=list(pzbio10.km,pzbio11.km,pzbio12.km)
                )

rhos <- with(H, rhohat(points, Image))
plot(rhos)

### 评估大洋洲的分布规律 ####
ocv <- raster::extract(envg,oc) %>% cbind(oc) %>% na.omit()
oc <- ocv[,4:5] %>% data.frame()

ocp <- ppp_spatstat(oc)
ocp.km <- rescale(ocp, 1000, "km")
plot(ocp.km)
# 评估数据的L模型和G模型:
L <- Lest(ocp.km)
plot(L, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))
## 这种方法可以更清晰的解读数据的分布规律:
plot(L, . -r ~ r, main=NULL, las=1, legendargs=list(cex=0.6, xpd=TRUE, inset=c(0, 0) ))
plot(L, . -r ~ r, main=NULL, las=1)

## 使用矫正的L-hat来平衡非稳态:
library(spatstat)
plot(envelope(ocp.km, Linhom, correction = "Ripley", verbose = T), 
     . - r ~ r, main = "Sigma 0.75")
lhat <- Linhom(ocp.km, correction="Ripley")
plot(lhat, . -r ~ r, main=NULL, las=1,xlim = c(0, 300))

## G评估同样不需要依赖与距离:用这个可能更科学一些;
g  <- pcf(ocp.km)
plot(g, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))
plot(g, . -r ~ r, main=NULL, las=1,xlim = c(0, 50), 
     legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))

## 衡量变量在三维空间中的聚集程度:
## 这样做有什么问题吗?
# 将数据0-1化之后用于评估;
# 这个结果有点难以解释;在较小的尺度上表现出随机分布的趋势,
# 而在较大尺度上展示出聚集的趋势;
az <- c("BIO10","BIO11","BIO12")
azs <- paste0("./f2_envs/" ,az,".tif")
envg<- stack(azs) 
ass <- raster::extract(envg,oc) %>% na.omit()
ass <- zero_1(ass)[,4:6]
names(ass) <- c("BIO10","BIO11","BIO12")
threeDPpp <- pp3(ass$BIO10, ass$BIO11, ass$BIO12, box3(c(0,1)))
plot(threeDPpp)
threeDPppKest <- K3est(threeDPpp)
plot(threeDPppKest, . -r ~ r, main=NULL, las=1)
plot(threeDPppKest,  main=NULL, las=1)
## 使用密度估计来评估建模变量的强度:
# 方法1:
Q <- quadratcount(ocp.km, nx= 10, ny=10)
plot(ocp.km, pch=20, cols="grey70", main=NULL)  # Plot points
plot(Q, add=TRUE)  # Add quadrat grid
# 计算每个分布块的密度:
Q.d <- intensity(Q)
# Plot the density
plot(intensity(Q, image=TRUE), main=NULL, las=1)  # Plot density raster
plot(ocp.km, pch=20, cex=0.6, col=rgb(0,0,0,.5), add=TRUE)  # Add points

## 方法2:

X<-ocp.km
plot(X)
plot(density(X, 10)) ## 构建的数字影响强度:
contour(density(X,10), axes=FALSE)
## 结合地理映射关系来考虑协变量对分布的影响和限制
# 这种分析也是建立在点的密度概率基础之上;

## 对坡度进行分组,分组后用于quadratcount的映射;


pzbio10 <- raster_utm(oc,envg$BIO10)
pzbio10 <- as.im(pzbio10) 
hist(pzbio10 , main=NULL, las=1)
pzbio10.km <- rescale(pzbio10, 1000, "km")

pzbio11 <- raster_utm(oc,envg$BIO11)
pzbio11 <- as.im(pzbio11) 
hist(pzbio11 , main=NULL, las=1)
pzbio11.km <- rescale(pzbio11, 1000, "km") 

pzbio12 <- raster_utm(oc,envg$BIO12)
pzbio12 <- as.im(pzbio12) %>% log()
hist(pzbio12 , main=NULL, las=1)
pzbio12.km <- rescale(pzbio12, 1000, "km") 

b <- quantile(pzbio10.km, probs = (0:4)/4) %>% round(3)
Zcut <- cut(pzbio10.km, breaks = b)
V <- tess(image = Zcut) 
plot(V)
plot(ocp.km, add = TRUE, pch = "+")
## 基于分组
qb <- quadratcount(ocp.km, tess = V)
plot(qb)
## 可视化点密度和协变量之间的依赖关系:
plot(rhohat(ocp.km, pzbio10.km,method="ratio"))
bio10th0 <- rhohat(ocp.km, pzbio12.km)
## 使用这种写法探索多种变量的空间分布特征:
## 单维:
plot(ocp.km)
H <- hyperframe(points=list(ocp.km,ocp.km,ocp.km),
                Image=list(pzbio10.km,pzbio11.km,pzbio12.km)
                )

rhos <- with(H, rhohat(points, Image))
plot(rhos)
plot(oc)
map(add =T)

### 北美洲 ####
nav <- raster::extract(envg,na) %>% cbind(na) %>% na.omit()
na <- nav[,4:5] %>% data.frame()


nap <- ppp_spatstat(na)
nap.km <- rescale(nap, 1000, "km")

# 评估数据的L模型和G模型:
L <- Lest(nap.km)
plot(L, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))
## 这种方法可以更清晰的解读数据的分布规律:
plot(L, . -r ~ r, main=NULL, las=1, legendargs=list(cex=0.6, xpd=TRUE, inset=c(0, 0) ))
plot(L, . -r ~ r, main=NULL, las=1)

## 使用矫正的L-hat来平衡非稳态:
library(spatstat)
plot(envelope(nap.km, Linhom, correction = "Ripley", verbose = T), 
     . - r ~ r, main = "Sigma 0.75")
lhat <- Linhom(nap.km, correction="Ripley")
plot(lhat, . -r ~ r, main=NULL, las=1,xlim = c(0, 300))

## G评估同样不需要依赖与距离:用这个可能更科学一些;
g  <- pcf(nap.km)
plot(g, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))
plot(g, . -r ~ r, main=NULL, las=1,xlim = c(0, 50), 
     legendargs=list(cex=0.8, xpd=TRUE, inset=c(0.3, 0.1) ))

## 衡量变量在三维空间中的聚集程度:
## 这样做有什么问题吗?
# 将数据0-1化之后用于评估;
# 这个结果有点难以解释;在较小的尺度上表现出随机分布的趋势,
# 而在较大尺度上展示出聚集的趋势;
az <- c("BIO10","BIO11","BIO12")
azs <- paste0("./f2_envs/" ,az,".tif")
envg<- stack(azs) 
ass <- raster::extract(envg,na) %>% na.omit()
ass <- zero_1(ass)[,4:6]
names(ass) <- c("BIO10","BIO11","BIO12")
threeDPpp <- pp3(ass$BIO10, ass$BIO11, ass$BIO12, box3(c(0,1)))
plot3d(ass)
threeDPppKest <- K3est(threeDPpp)
plot(threeDPppKest, . -r ~ r, main=NULL, las=1)

## 使用密度估计来评估建模变量的强度:
# 方法1:
Q <- quadratcount(nap.km, nx= 10, ny=10)
plot(nap.km, pch=20, cols="grey70", main=NULL)  # Plot points
plot(Q, add=TRUE)  # Add quadrat grid
# 计算每个分布块的密度:
Q.d <- intensity(Q)
# Plot the density
plot(intensity(Q, image=TRUE), main=NULL, las=1)  # Plot density raster
plot(nap.km, pch=20, cex=0.6, col=rgb(0,0,0,.5), add=TRUE)  # Add points

## 方法2:

X<-nap.km
plot(X)
plot(density(X, 10)) ## 构建的数字影响强度:
contour(density(X,10), axes=FALSE)
## 结合地理映射关系来考虑协变量对分布的影响和限制
# 这种分析也是建立在点的密度概率基础之上;

## 对坡度进行分组,分组后用于quadratcount的映射;


pzbio10 <- raster_utm(na,envg$BIO10)
pzbio10 <- as.im(pzbio10)
hist(pzbio10 , main=NULL, las=1)
pzbio10.km <- rescale(pzbio10, 1000, "km")

pzbio11 <- raster_utm(na,envg$BIO11)
pzbio11 <- as.im(pzbio11) 
hist(pzbio11 , main=NULL, las=1)
pzbio11.km <- rescale(pzbio11, 1000, "km") 

pzbio12 <- raster_utm(na,envg$BIO12)
pzbio12 <- as.im(pzbio12) 
plot(pzbio12)
hist(pzbio12 , main=NULL, las=1)
pzbio12.km <- rescale(pzbio12, 1000, "km") 

b <- quantile(pzbio10.km, probs = (0:4)/4) %>% round(3)
Zcut <- cut(pzbio10.km, breaks = b)
V <- tess(image = Zcut) 
plot(V)
plot(nap.km, add = TRUE, pch = "+")
## 基于分组
qb <- quadratcount(nap.km, tess = V)
plot(qb)
## 可视化点密度和协变量之间的依赖关系:
plot(rhohat(nap.km, pzbio10.km,method="ratio"))
bio10th0 <- rhohat(nap.km, pzbio10.km)
## 使用这种写法探索多种变量的空间分布特征:
## 单维:
H <- hyperframe(points=list(nap.km,nap.km,nap.km),
                Image=list(pzbio10.km,pzbio11.km,pzbio12.km)
                )

rhos <- with(H, rhohat(points, Image))
plot(rhos)



asv <- raster::extract(envg,as) %>% cbind(as) %>% na.omit()

sav <- raster::extract(envg,sa) %>% cbind(sa) %>% na.omit()

ocv <- raster::extract(envg,oc) %>% cbind(oc) %>% na.omit()

nav <- raster::extract(envg,na) %>% cbind(na) %>% na.omit()
write.csv(sav,"C:/Users/admin/Desktop/sav.csv")

点统计的一般分析源码

# 点模式分析的最终版:
# 概念介绍:
# 空间分析分为一阶和二阶:
# 一阶分析可能会掩盖局域空间自相关;
# 使用一阶分析是判断CSR的最近邻是否与实际地理分布具有一致性:
# 二阶分析,基于最近邻buffer,评估在不同距离尺度上的空间相关性或者说聚集程度;
library(spatstat)
load(url("http://github.com/mgimond/Spatial/raw/master/Data/ppa.RData"))
marks(starbucks)  <- NULL
Window(starbucks) <- ma
plot(starbucks, main=NULL, cols=rgb(0,0,0,.2), pch=20)


# 点密度的分析的一阶模式,不仅是判断点的分布格局,还可以基于密度创建密度强度栅格;
# 使用quadratcount构建密度分类栅格:
Q <- quadratcount(starbucks, nx= 6, ny=3)
plot(starbucks, pch=20, cols="grey70", main=NULL)  # Plot points
plot(Q, add=TRUE)  # Add quadrat grid
# 计算每个分布块的密度:
Q.d <- intensity(Q)
# Plot the density
plot(intensity(Q, image=TRUE), main=NULL, las=1)  # Plot density raster
plot(starbucks, pch=20, cex=0.6, col=rgb(0,0,0,.5), add=TRUE)  # Add points

# 使用raster构建栅格块:
library(rspatial)
city <- sp_data('city')
crime <- sp_data('crime')
## 点转密度栅格:
r <- raster(city)
res(r) <- 1000
plot(r)
quads <- as(r, 'SpatialPolygons')
plot(quads, add=TRUE)
points(crime, col='red', cex=.5)
nc <- rasterize(coordinates(crime), r, fun='count', background=0)
plot(nc)
plot(city, add=TRUE)
## 统计频率:
f <- freq(ncrimes, useNA='no')
plot(f, pch=20)


## 将点分布进行密度可视化:
## 参考自:
# https://www.r-bloggers.com/2015/09/spatstat-an-introduction-and-measurements-with-bio7/

data(swedishpines)
X<-swedishpines
plot(X)
plot(density(swedishpines, 10)) ## 构建的数字影响强度:
contour(density(X,10), axes=FALSE)
## 结合地理映射关系来考虑协变量对分布的影响和限制
# 这种分析也是建立在点的密度概率基础之上;
data(bei)
slope <- bei.extra$grad
par(mfrow = c(1, 2))
plot(bei)
plot(bei)
## 对坡度进行分组,分组后用于quadratcount的映射;
data(bei)
Z <- bei.extra$grad
b <- quantile(Z, probs = (0:4)/4)
Zcut <- cut(Z, breaks = b, labels = 1:4)
V <- tess(image = Zcut) 
plot(V)
plot(bei, add = TRUE, pch = "+")
## 基于分组的
qb <- quadratcount(bei, tess = V)
plot(qb)
## 可视化点密度和协变量之间的依赖关系:
plot(rhohat(bei, slope))


## 数据缩放,在大尺度上,一般需要以km作为单位:
starbucks.km <- rescale(starbucks, 1000, "km")
ma.km <- rescale(ma, 1000, "km")
pop.km    <- rescale(pop, 1000, "km")
pop.lg.km <- rescale(pop.lg, 1000, "km")

## 可以考虑根据协变量来分组点模式的密度:
brk  <- c( -Inf, 4, 6, 8 , Inf)  # Define the breaks
Zcut <- cut(pop.lg.km, breaks=brk, labels=1:4)  # Classify the raster
E    <- tess(image=Zcut)  # Create a tesselated surface
# 查看协变量的映射关系:
plot(E, main="", las=1)
## 根据协变量重新映射分组变量:
Q   <- quadratcount(starbucks.km, tess = E)  # Tally counts
## 根据协变量的映射关系,重新绘制:
plot(intensity(Q, image=TRUE), las=1, main=NULL)
plot(starbucks.km, pch=20, cex=0.6, col=rgb(1,1,1,.5), add=TRUE)
## 修改配色方案:
cl <-  interp.colours(c("lightyellow", "orange" ,"red"), E$n)
plot( intensity(Q, image=TRUE), las=1, col=cl, main=NULL)
plot(starbucks.km, pch=20, cex=0.6, col=rgb(0,0,0,.5), add=TRUE)

## 基于密度函数来刻画数据分布关系;
# 使用50 km带宽(sigma = 50), 这里是50km;
K2 <- density(starbucks.km, sigma=50) # Using a 50km bandwidth
plot(K2, main=NULL, las=1)
contour(K2, add=TRUE)


## 根据协变量的分布来评估密度强度和协变量之间的关系:
# 这个很有用,可以用于评估环境变量与分布数据强度的依赖关系:
# Compute rho using the ratio method
rho <- rhohat(starbucks.km, pop.lg.km,  method="ratio")
# Generate rho vs covariate plot
plot(rho, las=1, main=NULL, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0)))
## 如果协变量是唯一主导因素,则可以基于协变量建模预测响应密度:
pred <- predict(rho)
cl   <- interp.colours(c("lightyellow", "orange" ,"red"), 100) # Create color scheme
plot(pred, col=cl, las=1, main=NULL, gamma = 0.25)

## 还可以补做的内容是评估预测响应变量与实际观测强度密度之间的关系:
pred <- predict(rho)
cl   <- interp.colours(c("lightyellow", "orange" ,"red"), 100) # Create color scheme
plot(pred, col=cl, las=1, main=NULL, gamma = 0.25)

## 还可以基于泊松分布,构建协变量对应关系:
# Create the Poisson point process model
PPM1 <- ppm(starbucks.km ~ pop.lg.km)
# Plot the relationship
plot(effectfun(PPM1, "pop.lg.km", se.fit=TRUE), main=NULL, 
     las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))
PPM1
## 空间协变量使用的写法;
lng.ppm02 <- ppm(rcas.ppp, ~ offset(log(rpop)) + rinc + rroad, covariates = list(rpop = rpop.den, rroad = rcrroad.im, rinc = rcrinc.im))



## 基于距离评估点的空间格局:
# 一阶模式:构建观测点间距离,然后构建CSR:
ann.p <- mean(nndist(starbucks.km, k=1))

# 统计最近邻的分布格局:这个有大用!!
ANN <- apply(nndist(starbucks.km, k=1:100),2,FUN=mean)
plot(ANN ~ eval(1:100), type="b", main=NULL, las=1)
# 构建随机模拟:

n     <- 99               # Number of simulations
ann.r <- vector(length = n) # Create an empty object to be used to store simulated ANN values
for (i in 1:n){
  rand.p   <- rpoint(n=starbucks.km$n, win=ma.km)  # Generate random point locations
  ann.r[i] <- mean(nndist(rand.p, k=1))  # Tally the ANN values
}
plot(rand.p, pch=16, main=NULL, cols=rgb(0,0,0,0.5))
## 可视化比较两者的随机分布状态:
# 这里存在一个前提环境是均值的,但是实际上环境是非均值状态;
# 所以这种评估方法是存在问题的;
hist(ann.r, main=NULL, las=1, breaks=40, col="bisque", xlim=range(ann.p, ann.r))
abline(v=ann.p, col="blue")

# 改进均值测试,将协变量纳入到建模体系中:
# 注意下面的这个 f=pop.km
n     <- 599L
ann.r <- vector(length=n)
for (i in 1:n){
  rand.p   <- rpoint(n=starbucks.km$n, f=pop.km) 
  ann.r[i] <- mean(nndist(rand.p, k=1))
}
hist(ann.r, main=NULL, las=1, breaks=40, col="bisque", xlim=range(ann.p, ann.r))
abline(v=ann.p, col="blue")
## 统计显著性:单尾检验:
N.greater <- sum(ann.r > ann.p)
p <- min(N.greater + 1, n + 1 - N.greater) / (n +1)
p


## 二阶评估方法(基于环境同质性)
# 注意是无论是K还是L函数,高于CSR都表示聚集趋势;
# L函数的优势在于可以查看到在较低尺度的空间分布特征;
K <- Kest(starbucks.km)
plot(K, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))

L <- Lest(starbucks.km, main=NULL)
plot(L, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))
## 这种方法可以更清晰的解读数据的分布规律:
plot(L, . -r ~ r, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))

## 基于对数函数的可视化方法,这是后来改进的;不依赖于距离;
# 这个方法可以在一定程度上克服同质性造成的误差
# 它是评估了点分布周围的甜甜圈的点密度强度,而非距离;
g  <- pcf(starbucks.km)
plot(g, main=NULL, las=1, legendargs=list(cex=0.8, xpd=TRUE, inset=c(1.01, 0) ))

##三维空间Kest:
# 这个或许可以让我直接脱离数据分布点本身来考虑数据间的依赖关系 :
threeDPpp <- pp3(runif(10), runif(10), runif(10), box3(c(0,1)))
plot(threeDPpp)
threeDPppKest <- K3est(threeDPpp)
plot(threeDPppKest)

## 二阶评估(基于环境异质性)
# 均匀的泊松点过程,这意味着我们假设站点上各个点的位置是独立的(泊松过程),
# 并且它们的强度在不同地点之间没有差异(同质)。
# 不均匀的泊松点过程是指由过程生成的分布,
# 在该过程中,站点上的点的位置是独立的,但它们的强度确实有所不同。
# 使用点密度分布图可以在不同等级sigma中查看点密度的不均性;
# 也就是给与不同的梯度关系来查看密度梯度关系;
par(mfrow = c(1, 3))  # set up to plot 3 plots on one row
# plot density at various levels of sigma
plot(density(Liencres.pp, sigma = 0.5), main = "Sigma 0.5")
plot(density(Liencres.pp, sigma = 0.75), main = "Sigma 0.75")
plot(density(Liencres.pp, sigma = 1), main = "Sigma 1.0")

# 使用:quadrat.test来评估点分布的异质性:
set.seed(324324)  # set random seed for repeatability
# 这个结果会受到空间范围和栅格大小的限制:
(M <- quadrat.test(Liencres.pp, nx = 4, ny = 10, method = "MonteCarlo"))
plot(density(Liencres.pp), main = "Grid Counts")
plot(M, cex = 0.5, col = "white", add = T)


# calculate and plot inhomogeneous L-hat ,使用矫正的L-hat来纠正数据:
plot(envelope(Liencres.pp, Linhom, sigma = 0.75, correction = "Ripley", verbose = F), 
     . - r ~ r, main = "Sigma 0.75")

## 空间自相关模式:
# 这里的空间统计模式只使用R代码进行实现,具体代码分析市还是使用geoda;
as <- read.csv("D:/xh2/f1_points/xh/xh_as.csv")[,2:3]
bio1 <- raster("D:/xh2/f2_envs/BIO1.tif")
asv <- raster::extract(bio1,as)
ass <- cbind(asv,as)%>% na.omit() 
as <- ass[,2:3]

source("D:/HIDATA/项目代码xmind管理/share_code/sdmenm.R")

asutm <- point_wgs_utm(as)
range <- poly_wgs_utm(as)
plot(range)
points(asutm)

## 基于点距离进行莫兰指数计算:
ptdist=pointDistance(asutm)

min.dist<-min(ptdist); # Minimum
mean.dist<-mean(ptdist); # Mean

nb<-dnearneigh(coordinates(asutm),min.dist,mean.dist)

w2=knn2nb(knearneigh(asutm,k=6))
nb_lw<-nb2listw(w2,style="B") ## 这个可能要修改参数:
## 观测点密度分布的规律和链接:
plot(nb_lw,coordinates(asutm),col="red")

## 计算全局莫兰指数:
aa <- globalG.test(ass$asv,nb_lw,alternative="two.sided")
plot(aa)

## 计算局部莫兰指数:
local_g<-localG(ass$asv,nb_lw)
local_g <- as.matrix(local_g)
## 计算双尾显著性:
pval<- 2*pnorm(-abs(loac$.))
as$localg <- local_g 
as$pvalue <- pval
attach(as)
head(as)
as$np[pval>0.05]=2
as$np[pval<0.05]=1
detach(as)


attach(as)
ggplot(as,aes(x = longitude,y = latitude,colour = localg))+
  scale_colour_gradient(low = "blue",high = "red")+
  geom_point(size = pvalue)+
  scale_size_continuous(breaks = 0.05,guide = guide_legend()) 

ggplot(as,aes(x=longitude,y=latitude,colour=localg,shpae=np))+
  geom_point(alpha=.5)+
  scale_colour_gradient(low = "blue",high = "red")+
  scale_size(breaks = c(1,2))

results matching ""

    No results matching ""